In [1]:
import numpy as np
import pandas as pd
import scipy
import nltk
import sklearn
import random
import re
from sklearn.feature_extraction.text import CountVectorizer,TfidfTransformer
from sklearn.preprocessing import OneHotEncoder
from sklearn.multiclass import OneVsRestClassifier
from sklearn.metrics import f1_score, precision_score, recall_score
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.naive_bayes import GaussianNB

reuters 21-578 modApte version

a collection of 10,788 documents from the Reuters financial newswire service, partitioned into a training set with 7769 documents and a test set with 3019 documents


In [2]:
nltk.download('reuters')
nltk.download('punkt') # needed for tokenization


[nltk_data] Downloading package reuters to /home/felipe/nltk_data...
[nltk_data]   Package reuters is already up-to-date!
[nltk_data] Downloading package punkt to /home/felipe/nltk_data...
[nltk_data]   Package punkt is already up-to-date!
Out[2]:
True

In [3]:
dataset = nltk.corpus.reuters
dataset.root


Out[3]:
ZipFilePathPointer(u'/home/felipe/nltk_data/corpora/reuters.zip', u'reuters/')

In [4]:
# dataset.readme()

In [5]:
len(dataset.categories())


Out[5]:
90

In [6]:
len(dataset.fileids())


Out[6]:
10788

In [7]:
fileids = dataset.fileids()
sample_fileid = [ fileids[i] for i in sorted(random.sample(xrange(len(fileids)), 1)) ][0]
sample_fileid


Out[7]:
'training/3482'

In [8]:
dataset.abspath(sample_fileid)


Out[8]:
ZipFilePathPointer(u'/home/felipe/nltk_data/corpora/reuters.zip', u'reuters/training/3482')

In [9]:
len(dataset.words(sample_fileid))


Out[9]:
101

In [10]:
dataset.words(sample_fileid)


Out[10]:
[u'U', u'.', u'K', u'.', u'MONEY', u'MARKET', ...]

In [11]:
dataset.raw(sample_fileid)


Out[11]:
u"U.K. MONEY MARKET SHORTAGE FORECAST AT 300 MLN STG\n  The Bank of England said it forecast a\n  liquidity shortage of around 300 mln stg in the market today.\n      Among the main factors, the Bank said bills maturing in\n  official hands and the treasury bill take-up would drain 483\n  mln stg from the system while below target bankers' balances\n  and a rise in the note circulation would take out 50 mln and\n  100 mln stg respectively.\n      Partially offsetting these, exchequer transactions would\n  add around 355 mln stg, the Bank added.\n  \n\n"

In [12]:
dataset.words(sample_fileid)


Out[12]:
[u'U', u'.', u'K', u'.', u'MONEY', u'MARKET', ...]

In [13]:
dataset.sents(sample_fileid)


Out[13]:
[[u'U', u'.', u'K', u'.', u'MONEY', u'MARKET', u'SHORTAGE', u'FORECAST', u'AT', u'300', u'MLN', u'STG', u'The', u'Bank', u'of', u'England', u'said', u'it', u'forecast', u'a', u'liquidity', u'shortage', u'of', u'around', u'300', u'mln', u'stg', u'in', u'the', u'market', u'today', u'.'], [u'Among', u'the', u'main', u'factors', u',', u'the', u'Bank', u'said', u'bills', u'maturing', u'in', u'official', u'hands', u'and', u'the', u'treasury', u'bill', u'take', u'-', u'up', u'would', u'drain', u'483', u'mln', u'stg', u'from', u'the', u'system', u'while', u'below', u'target', u'bankers', u"'", u'balances', u'and', u'a', u'rise', u'in', u'the', u'note', u'circulation', u'would', u'take', u'out', u'50', u'mln', u'and', u'100', u'mln', u'stg', u'respectively', u'.'], ...]

In [14]:
dataset.paras(sample_fileid)


Out[14]:
[[[u'U', u'.', u'K', u'.', u'MONEY', u'MARKET', u'SHORTAGE', u'FORECAST', u'AT', u'300', u'MLN', u'STG', u'The', u'Bank', u'of', u'England', u'said', u'it', u'forecast', u'a', u'liquidity', u'shortage', u'of', u'around', u'300', u'mln', u'stg', u'in', u'the', u'market', u'today', u'.'], [u'Among', u'the', u'main', u'factors', u',', u'the', u'Bank', u'said', u'bills', u'maturing', u'in', u'official', u'hands', u'and', u'the', u'treasury', u'bill', u'take', u'-', u'up', u'would', u'drain', u'483', u'mln', u'stg', u'from', u'the', u'system', u'while', u'below', u'target', u'bankers', u"'", u'balances', u'and', u'a', u'rise', u'in', u'the', u'note', u'circulation', u'would', u'take', u'out', u'50', u'mln', u'and', u'100', u'mln', u'stg', u'respectively', u'.'], [u'Partially', u'offsetting', u'these', u',', u'exchequer', u'transactions', u'would', u'add', u'around', u'355', u'mln', u'stg', u',', u'the', u'Bank', u'added', u'.']]]

In [15]:
# http://scikit-learn.org/stable/modules/feature_extraction.html#text-feature-extraction
corpus_train = []
corpus_test = []
for fileid in dataset.fileids():
    document = dataset.raw(fileid)
    if re.match('training/',fileid):
        corpus_train.append(document)
    else:
        corpus_test.append(document)

In [16]:
len(corpus_train),len(corpus_test)


Out[16]:
(7769, 3019)

In [17]:
def preprocessor(string):
    repl = re.sub('<','',string)
    return repl.lower()

In [18]:
vectorizer = CountVectorizer(
                min_df=10, # tweaking this parameter reduces the length of the feature vector
                strip_accents='ascii',
                preprocessor=preprocessor,
                stop_words='english')

In [19]:
# need to use both corpuses for fitting because otherwise there may be words that only occur in the
# training set or in the test set
full_corpus = corpus_train + corpus_test
vectorizer.fit(full_corpus)

X_train_counts = vectorizer.transform(corpus_train)
X_test_counts = vectorizer.transform(corpus_test)
X_full_counts = vectorizer.transform(full_corpus)

X_train_counts.shape,X_test_counts.shape, X_full_counts.shape


Out[19]:
((7769, 6462), (3019, 6462), (10788, 6462))

In [20]:
#uncomment these to see how the vectorizer is analyzing, tokenizing and preprocessing documents

#vectorizer.build_analyzer()(dataset.raw(fileid))
#vectorizer.build_tokenizer()("ADVANCED INSTITUTIONAL <AIMS> CUTS WORKFORCE\n  Advanced Institutional ")
#vectorizer.build_preprocessor()("ADVANCED INSTITUTIONAL <AIMS> CUTS WORKFORCE\n  Advanced Institutional ")

In [21]:
X_train_counts[0].toarray().ravel()


Out[21]:
array([0, 0, 0, ..., 1, 0, 0])

In [22]:
X_test_counts[0].toarray().ravel()


Out[22]:
array([0, 0, 0, ..., 0, 0, 0])

In [23]:
transformer = TfidfTransformer()
# again, we need to fit the transformer to all documents (train and test)
transformer.fit(X_full_counts)

X_train_tfidf = transformer.transform(X_train_counts)
X_test_tfidf = transformer.transform(X_test_counts)
X_full_tfidf = transformer.transform(X_full_counts)

X_train_tfidf.shape, X_test_tfidf.shape, X_full_tfidf.shape


Out[23]:
((7769, 6462), (3019, 6462), (10788, 6462))

In [24]:
X_train_tfidf[0].toarray().ravel()


Out[24]:
array([ 0.       ,  0.       ,  0.       , ...,  0.0466051,  0.       ,  0.       ])

In [25]:
X_test_tfidf[0].toarray().ravel()


Out[25]:
array([ 0.,  0.,  0., ...,  0.,  0.,  0.])

In [26]:
Y_train = []
Y_test = []

for (idx,fileid) in enumerate(dataset.fileids()):    
    categories = '*'.join(dataset.categories(fileid))

    if re.match('training/',fileid):
        Y_train.append(categories)
    else:
        Y_test.append(categories)

series_train = pd.Series(Y_train)
Y_train_df = series_train.str.get_dummies(sep='*')

series_test = pd.Series(Y_test)
Y_test_df = series_test.str.get_dummies(sep='*')

Y_train = Y_train_df.values
Y_test = Y_test_df.values

Y_train.shape,Y_test.shape


Out[26]:
((7769, 90), (3019, 90))

In [27]:
%%time

clf = LogisticRegression()
meta_clf = OneVsRestClassifier(clf)

meta_clf.fit(X_train_tfidf,Y_train)


CPU times: user 6.15 s, sys: 3.64 ms, total: 6.16 s
Wall time: 6.18 s

In [28]:
Y_pred = meta_clf.predict(X_test_tfidf)

In [29]:
f1_score(Y_test,Y_pred,average='micro')


Out[29]:
0.76201298701298703

In [ ]: